Question: Can anyone help me to addone or two basic unit tests for any fucntions in the core file ? Wiki core ~~~~~~~~~ from
Can anyone help me to addone or two basic unit tests for any fucntions in the core file ?
""" Wiki core ~~~~~~~~~ """ from collections import OrderedDict from io import open import os import re
from flask import abort from flask import url_for import markdown
def clean_url(url): """ Cleans the url and corrects various errors. Removes multiple spaces and all leading and trailing spaces. Changes spaces to underscores and makes all characters lowercase. Also takes care of Windows style folders use.
:param str url: the url to clean
:returns: the cleaned url :rtype: str """ url = re.sub('[ ]{2,}', ' ', url).strip() url = url.lower().replace(' ', '_') url = url.replace('\\\\', '/').replace('\\', '/') return url
def wikilink(text, url_formatter=None): """ Processes Wikilink syntax "[[Link]]" within the html body. This is intended to be run after content has been processed by markdown and is already HTML.
:param str text: the html to highlight wiki links in. :param function url_formatter: which URL formatter to use, will by default use the flask url formatter
Syntax: This accepts Wikilink syntax in the form of [[WikiLink]] or [[url/location|LinkName]]. Everything is referenced from the base location "/", therefore sub-pages need to use the [[page/subpage|Subpage]].
:returns: the processed html :rtype: str """ if url_formatter is None: url_formatter = url_for link_regex = re.compile( r"((?)\[\[([^<].+?) \s*([|] \s* (.+?) \s*)?]])", re.X | re.U ) for i in link_regex.findall(text): title = [i[-1] if i[-1] else i[1]][0] url = clean_url(i[1]) html_url = u"{1}".format( url_formatter('wiki.display', url=url), title ) text = re.sub(link_regex, html_url, text, count=1) return text
class Processor(object): """ The processor handles the processing of file content into metadata and markdown and takes care of the rendering.
It also offers some helper methods that can be used for various cases. """
preprocessors = [] postprocessors = [wikilink]
def __init__(self, text): """ Initialization of the processor.
:param str text: the text to process """ self.md = markdown.Markdown([ 'codehilite', 'fenced_code', 'meta', 'tables' ]) self.input = text self.markdown = None self.meta_raw = None
self.pre = None self.html = None self.final = None self.meta = None
def process_pre(self): """ Content preprocessor. """ current = self.input for processor in self.preprocessors: current = processor(current) self.pre = current
def process_markdown(self): """ Convert to HTML. """ self.html = self.md.convert(self.pre)
def split_raw(self): """ Split text into raw meta and content. """ self.meta_raw, self.markdown = self.pre.split(' ', 1)
def process_meta(self): """ Get metadata.
.. warning:: Can only be called after :meth:`html` was called. """ # the markdown meta plugin does not retain the order of the # entries, so we have to loop over the meta values a second # time to put them into a dictionary in the correct order self.meta = OrderedDict() for line in self.meta_raw.split(' '): key = line.split(':', 1)[0] # markdown metadata always returns a list of lines, we will # reverse that here self.meta[key.lower()] = \ ' '.join(self.md.Meta[key.lower()])
def process_post(self): """ Content postprocessor. """ current = self.html for processor in self.postprocessors: current = processor(current) self.final = current
def process(self): """ Runs the full suite of processing on the given text, all pre and post processing, markdown rendering and meta data handling. """ self.process_pre() self.process_markdown() self.split_raw() self.process_meta() self.process_post()
return self.final, self.markdown, self.meta
class Page(object): def __init__(self, path, url, new=False): self.path = path self.url = url self._meta = OrderedDict() if not new: self.load() self.render()
def __repr__(self): return u"
def load(self): with open(self.path, 'r', encoding='utf-8') as f: self.content = f.read()
def render(self): processor = Processor(self.content) self._html, self.body, self._meta = processor.process()
def save(self, update=True): folder = os.path.dirname(self.path) if not os.path.exists(folder): os.makedirs(folder) with open(self.path, 'w', encoding='utf-8') as f: for key, value in self._meta.items(): line = u'%s: %s ' % (key, value) f.write(line) f.write(u' ') f.write(self.body.replace(u' ', u' ')) if update: self.load() self.render()
@property def meta(self): return self._meta
def __getitem__(self, name): return self._meta[name]
def __setitem__(self, name, value): self._meta[name] = value
@property def html(self): return self._html
def __html__(self): return self.html
@property def title(self): try: return self['title'] except KeyError: return self.url
@title.setter def title(self, value): self['title'] = value
@property def tags(self): try: return self['tags'] except KeyError: return ""
@tags.setter def tags(self, value): self['tags'] = value
class Wiki(object): def __init__(self, root): self.root = root
def path(self, url): return os.path.join(self.root, url + '.md')
def exists(self, url): path = self.path(url) return os.path.exists(path)
def get(self, url): path = self.path(url) #path = os.path.join(self.root, url + '.md') if self.exists(url): return Page(path, url) return None
def get_or_404(self, url): page = self.get(url) if page: return page abort(404)
def get_bare(self, url): path = self.path(url) if self.exists(url): return False return Page(path, url, new=True)
def move(self, url, newurl): source = os.path.join(self.root, url) + '.md' target = os.path.join(self.root, newurl) + '.md' # normalize root path (just in case somebody defined it absolute, # having some '../' inside) to correctly compare it to the target root = os.path.normpath(self.root) # get root path longest common prefix with normalized target path common = os.path.commonprefix((root, os.path.normpath(target))) # common prefix length must be at least as root length is # otherwise there are probably some '..' links in target path leading # us outside defined root directory if len(common) < len(root): raise RuntimeError( 'Possible write attempt outside content directory: ' '%s' % newurl) # create folder if it does not exists yet folder = os.path.dirname(target) if not os.path.exists(folder): os.makedirs(folder) os.rename(source, target)
def delete(self, url): path = self.path(url) if not self.exists(url): return False os.remove(path) return True
def index(self): """ Builds up a list of all the available pages.
:returns: a list of all the wiki pages :rtype: list """ # make sure we always have the absolute path for fixing the # walk path pages = [] root = os.path.abspath(self.root) for cur_dir, _, files in os.walk(root): # get the url of the current directory cur_dir_url = cur_dir[len(root)+1:] for cur_file in files: path = os.path.join(cur_dir, cur_file) if cur_file.endswith('.md'): url = clean_url(os.path.join(cur_dir_url, cur_file[:-3])) page = Page(path, url) pages.append(page) return sorted(pages, key=lambda x: x.title.lower())
def index_by(self, key): """ Get an index based on the given key.
Will use the metadata value of the given key to group the existing pages.
:param str key: the attribute to group the index on.
:returns: Will return a dictionary where each entry holds a list of pages that share the given attribute. :rtype: dict """ pages = {} for page in self.index(): value = getattr(page, key) pre = pages.get(value, []) pages[value] = pre.append(page) return pages
def get_by_title(self, title): pages = self.index(attr='title') return pages.get(title)
def get_tags(self): pages = self.index() tags = {} for page in pages: pagetags = page.tags.split(',') for tag in pagetags: tag = tag.strip() if tag == '': continue elif tags.get(tag): tags[tag].append(page) else: tags[tag] = [page] return tags
def index_by_tag(self, tag): pages = self.index() tagged = [] for page in pages: if tag in page.tags: tagged.append(page) return sorted(tagged, key=lambda x: x.title.lower())
def search(self, term, ignore_case=True, attrs=['title', 'tags', 'body']): pages = self.index() regex = re.compile(term, re.IGNORECASE if ignore_case else 0) matched = [] for page in pages: for attr in attrs: if regex.search(getattr(page, attr)): matched.append(page) break return matched
Test_core py file:
# -*- coding: utf-8 -*- from io import open from mock import patch import os from unittest import TestCase
from wiki.core import clean_url from wiki.core import wikilink from wiki.core import Page from wiki.core import Processor
from . import WikiBaseTestCase
PAGE_CONTENT = u"""\ title: Test tags: one, two, 3, j
Hello, how are you guys?
**Is it not _magnificent_**? """
CONTENT_HTML = u"""\
Hello, how are you guys?
Is it not magnificent?
"""WIKILINK_PAGE_CONTENT = u"""\ title: link
[[target]] """
WIKILINK_CONTENT_HTML = u"""\
target
"""def simple_url_formatter(endpoint, url): """ A simple URL formatter to use when no application context is available.
:param str endpoint: the endpoint to use. :param str url: the URL to format """ return u"/{}".format(url)
def wikilink_simple_url_formatter(text): """ A wikilink function that uses the simple URL formatter.
:param str text: the text to format. """ return wikilink(text, simple_url_formatter)
class SimpleWikilinkProcessor(Processor): """ As the processor can currently not take arguments for preprocessors we need to temporarily subclass it to overwrite it with the simple URL formatter. """ postprocessors = [wikilink_simple_url_formatter]
class URLCleanerTestCase(TestCase): """ Contains various tests for the url cleaner. """
def test_clean_simple_url(self): """ Assert a simple URL remains unchanged. """ simple_url = '/test'
assert clean_url(simple_url) == simple_url
def test_clean_deep_url(self): """ Assert a deep URL remains unchanged. """ deep_url = '/test/two/three/yo'
assert clean_url(deep_url) == deep_url
def test_handle_spaces(self): """ Assert that unnecessary spaces will be removed and all other spaces correctly substituted. """ assert (clean_url(' /hello you/wonderful/person ') == '/hello_you/wonderful/person')
def test_handle_uppercase(self): """ Assert that uppercase characters will be substituted. """ assert clean_url("HELLo") == "hello"
class WikilinkTestCase(TestCase): """ Contains various tests for the wikilink parser. """
def test_simple_wikilink(self): """ Assert a simple wikilink is converted correctly. """ formatted = wikilink(u'[[target]]', simple_url_formatter) assert formatted == "target"
def test_titled_wikilink(self): """ Assert a wikilink with a title will be converted correctly. """ formatted = wikilink(u'[[target|Target]]', simple_url_formatter) assert formatted == "Target"
def test_multiple_wikilinks(self): """ Assert a text with multiple wikilinks will be converted correctly. """ formatted = wikilink( u'[[target|Target]] is better than [[alternative]]', simple_url_formatter ) assert formatted == ( "Target is better than" " alternative" )
class ProcessorTestCase(WikiBaseTestCase): """ Contains various tests for the :class:`~wiki.core.Processors` class. """
page_content = PAGE_CONTENT
def setUp(self): super(ProcessorTestCase, self).setUp() self.processor = Processor(self.page_content)
def test_process(self): """ Assert processing works correctly. """ html, original, meta = self.processor.process()
assert html == CONTENT_HTML assert original == PAGE_CONTENT.split(u' ', 1)[1] assert meta == { 'title': u'Test', 'tags': u'one, two, 3, j' }
def test_process_wikilinks(self): """ Assert that wikilinks are processed correctly. """ self.processor = SimpleWikilinkProcessor(WIKILINK_PAGE_CONTENT) html, _, _ = self.processor.process() assert html == WIKILINK_CONTENT_HTML
class PageTestCase(WikiBaseTestCase): """ Contains various tests for the :class:`~wiki.core.Page` class. """
page_content = PAGE_CONTENT
def setUp(self): super(PageTestCase, self).setUp()
self.page_path = self.create_file('test.md', self.page_content) self.page = Page(self.page_path, 'test')
def test_page_loading(self): """ Assert that content is loaded correctly from disk. """ assert self.page.content == PAGE_CONTENT
def test_page_meta(self): """ Assert meta data is interpreted correctly. """ assert self.page.title == u'Test' assert self.page.tags == u'one, two, 3, j'
def test_page_saving(self): """ Assert that saving a page back to disk persists it correctly. """ self.page.save() with open(self.page_path, 'r', encoding='utf-8') as fhd: saved = fhd.read() assert saved == self.page_content
class WikiTestCase(WikiBaseTestCase): """ Contains various tests for the :class:`~wiki.core.Wiki` class. """
def test_simple_file_detection(self): """ Assert a test markdown file is correctly detected. """ self.create_file('test.md') assert self.wiki.exists('test') is True
def test_wrong_extension_detection(self): """ Assert a non markdown file is ingored. """ self.create_file('test.txt') assert self.wiki.exists('test') is False
def test_config_is_unreadable(self): """ Assert that the content file cannot be loaded as a page. """ # the config file is automatically created, so we can just run # tests without having to worry about anything assert self.wiki.exists('config') is False assert self.wiki.exists('config.py') is False
def test_delete(self): """ Assert deleting a URL will delete the file. """ self.create_file('test.md') self.wiki.delete("test") assert not os.path.exists(os.path.join(self.rootdir, 'test.md'))
def test_index(self): """ Assert index correctly lists all the files. """ self.create_file('test.md', PAGE_CONTENT) self.create_file('one/two/three.md', WIKILINK_PAGE_CONTENT) with patch('wiki.core.Processor', new=SimpleWikilinkProcessor): pages = self.wiki.index() assert len(pages) == 2
# as the index return should be sorted by the title # the first page should always be the link one and the other # one the second deeptestpage = pages[0] assert deeptestpage.url == 'one/two/three'
testpage = pages[1] assert testpage.url == 'test'
Step by Step Solution
There are 3 Steps involved in it
Get step-by-step solutions from verified subject matter experts
