From eee8d4e24a80e897dba47c3f596d449ee352c3d2 Mon Sep 17 00:00:00 2001 From: Julio Biason Date: Wed, 13 Oct 2021 13:28:54 -0300 Subject: [PATCH] Little Sister's Vocabulary --- .../.exercism/config.json | 10 + .../.exercism/metadata.json | 1 + python/little-sisters-vocab/HELP.md | 63 ++++ python/little-sisters-vocab/HINTS.md | 37 ++ python/little-sisters-vocab/README.md | 340 ++++++++++++++++++ python/little-sisters-vocab/strings.py | 51 +++ python/little-sisters-vocab/strings_test.py | 93 +++++ 7 files changed, 595 insertions(+) create mode 100644 python/little-sisters-vocab/.exercism/config.json create mode 100644 python/little-sisters-vocab/.exercism/metadata.json create mode 100644 python/little-sisters-vocab/HELP.md create mode 100644 python/little-sisters-vocab/HINTS.md create mode 100644 python/little-sisters-vocab/README.md create mode 100644 python/little-sisters-vocab/strings.py create mode 100644 python/little-sisters-vocab/strings_test.py diff --git a/python/little-sisters-vocab/.exercism/config.json b/python/little-sisters-vocab/.exercism/config.json new file mode 100644 index 0000000..d7eecdc --- /dev/null +++ b/python/little-sisters-vocab/.exercism/config.json @@ -0,0 +1,10 @@ +{ + "blurb": "Learn about strings by helping your little sister with her vocabulary homework.", + "icon": "two-fer", + "authors": ["aldraco", "bethanyg"], + "files": { + "solution": ["strings.py"], + "test": ["strings_test.py"], + "exemplar": [".meta/exemplar.py"] + } +} diff --git a/python/little-sisters-vocab/.exercism/metadata.json b/python/little-sisters-vocab/.exercism/metadata.json new file mode 100644 index 0000000..c0794bb --- /dev/null +++ b/python/little-sisters-vocab/.exercism/metadata.json @@ -0,0 +1 @@ +{"track":"python","exercise":"little-sisters-vocab","id":"a8a01b8effce459095fbd5b0edc84d77","url":"https://exercism.org/tracks/python/exercises/little-sisters-vocab","handle":"JBiason","is_requester":true,"auto_approve":false} \ No newline at end of file diff --git a/python/little-sisters-vocab/HELP.md b/python/little-sisters-vocab/HELP.md new file mode 100644 index 0000000..e9dfff8 --- /dev/null +++ b/python/little-sisters-vocab/HELP.md @@ -0,0 +1,63 @@ +# Help + +## Running the tests + +You can run the included tests by typing `pytest _test.py` on the command line from within the exercise's directory. + +You can also tell Python to run the pytest module on the command line from either within the exercise directory or with a path to the exercise directory. +`python -m pytest _test.py` from within the exercise directory. + +`python -m pytest /fully/qualified/path/to//` OR `python -m pytest realtive/path/to/` from a non-exercise directory. + +Many IDE's and code editors also have built-in support for using PyTest to run tests. + +- [Visual Studio Code](https://code.visualstudio.com/docs/python/testing) +- [PyCharm Professional & Community Editions](https://www.jetbrains.com/help/pycharm/pytest.html#create-pytest-test) +- [Atom](https://atom.io/packages/atom-python-test) +- [Spyder](https://www.spyder-ide.org/blog/introducing-unittest-plugin/) +- [Sublime](https://github.com/kaste/PyTest) +- [vim-test](https://github.com/vim-test/vim-test) + +See the [Python tests page](https://github.com/exercism/python/blob/main/docs/TESTS.md) for more information. + +### Common `pytest` options + +- `-v` : enable verbose output. +- `-x` : stop running tests on first failure. +- `--ff` : run failures from previous test before running other test cases. + +For other options, see `python -m pytest -h`. PyTest documentation can be found [here](https://docs.pytest.org/en/latest/getting-started.html). + +## Submitting your solution + +You can submit your solution using the `exercism submit strings.py` command. +This command will upload your solution to the Exercism website and print the solution page's URL. + +It's possible to submit an incomplete solution which allows you to: + +- See how others have completed the exercise +- Request help from a mentor + +## Need to get help? + +If you'd like help solving the exercise, check the following pages: + +- The [Python track's documentation](https://exercism.org/docs/tracks/python) +- [Exercism's support channel on gitter](https://gitter.im/exercism/support) +- The [Frequently Asked Questions](https://exercism.org/docs/using/faqs) + +Should those resources not suffice, you could submit your (incomplete) solution to request mentoring. + +Below are some resources for getting help if you run into trouble: + +- [The PSF](https://www.python.org) hosts Python downloads, documentation, and community resources. +- [Python Community on Discord](https://pythondiscord.com/) is a very helpful and active community. +- [#python on Libera.chat](https://www.python.org/community/irc/) this is where the cored developers for the language hang out and get work done. +- [Exercism on Gitter](https://gitter.im/exercism/home) join the Python room for Python-related questions or problems. +- [/r/learnpython/](https://www.reddit.com/r/learnpython/) is a subreddit designed for Python learners. +- [Python Community Forums](https://discuss.python.org/) +- [Pythontutor](http://pythontutor.com/) for stepping through small code snippets visually. + + +Additionally, [StackOverflow](http://stackoverflow.com/questions/tagged/python) is a good spot to search for your problem/question to see if it has been answered already. + If not - you can always [ask](https://stackoverflow.com/help/how-to-ask) or [answer](https://stackoverflow.com/help/how-to-answer) someone else's question. \ No newline at end of file diff --git a/python/little-sisters-vocab/HINTS.md b/python/little-sisters-vocab/HINTS.md new file mode 100644 index 0000000..2aefe8c --- /dev/null +++ b/python/little-sisters-vocab/HINTS.md @@ -0,0 +1,37 @@ +# Hints + +## General + +- The [Python Docs Tutorial for strings][python-str-doc] has an overview of the Python `str` type. +- String methods [.join()][str-join] and [.split()][str-split] ar very helpful when processing strings. +- The [Python Docs on Sequence Types][common sequence operations] has a rundown of operations common to all sequences, including `strings`, `lists`, `tuples`, and `ranges`. + +There's four activities in the assignment, each with a set of text or words to work with. + +## 1. Add a prefix to a word + +- Small strings can be concatenated with the `+` operator. + +## 2. Add prefixes to word groups + +- Believe it or not, `.join()` is all you need. +- Like `.split()`, `.join()` can take an arbitrary-length string, made up of any unicode code points. + +## 3. Remove a suffix from a word + +- Strings can be both indexed and sliced from either the left (starting at 0) or the right (starting at -1). +- If you want the last code point of an arbitrary-length string, you can use [-1]. +- The last three letters in a string can be "sliced off" using a negative index. e.g. 'beautiful'[:-3] == 'beauti' + +## 4. Extract and transform a word + +- Using `.split()` returns a list of strings broken on white space. +- `lists` are sequences, and can be indexed. +- `.split()` can be direcly indexed. e.g. `'Exercism rocks!'.split()[0] == 'Exercism'` +- Be careful of punctuation! Periods can be removed via slice: `'dark.'[:-1] == 'dark'` + +[python-str-doc]: https://docs.python.org/3/tutorial/introduction.html#strings + +[common sequence operations]: https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str +[str-join]: https://docs.python.org/3/library/stdtypes.html#str.join +[str-split]: https://docs.python.org/3/library/stdtypes.html#str.split \ No newline at end of file diff --git a/python/little-sisters-vocab/README.md b/python/little-sisters-vocab/README.md new file mode 100644 index 0000000..cc50eb8 --- /dev/null +++ b/python/little-sisters-vocab/README.md @@ -0,0 +1,340 @@ +# Little Sister's Vocabulary + +Welcome to Little Sister's Vocabulary on Exercism's Python Track. +If you need help running the tests or submitting your code, check out `HELP.md`. +If you get stuck on the exercise, check out `HINTS.md`, but try and solve it without using those first :) + +## Introduction + +A `str` in Python is an [immutable sequence][text sequence] of [Unicode code points][unicode code points]. +These could include letters, diacritical marks, positioning characters, numbers, currency symbols, emoji, punctuation, space and line break characters, and more. + Being immutable, a `str` object's value in memory doesn't change; methods that appear to modify a string return a new copy or instance of that `str` object. + + +A `str` literal can be declared via single `'` or double `"` quotes. The escape `\` character is available as needed. + + +```python + +>>> single_quoted = 'These allow "double quoting" without "escape" characters.' + +>>> double_quoted = "These allow embedded 'single quoting', so you don't have to use an 'escape' character". + +>>> escapes = 'If needed, a \'slash\' can be used as an escape character within a string when switching quote styles won\'t work.' +``` + +Multi-line strings are declared with `'''` or `"""`. + + +```python +>>> triple_quoted = '''Three single quotes or "double quotes" in a row allow for multi-line string literals. + Line break characters, tabs and other whitespace are fully supported. + + You\'ll most often encounter these as "doc strings" or "doc tests" written just below the first line of a function or class definition. + They\'re often used with auto documentation ✍ tools. + ''' +``` + +Strings can be concatenated using the `+` operator. + This method should be used sparingly, as it is not very performant or easily maintained. + + +```python +language = "Ukrainian" +number = "nine" +word = "девять" + +sentence = word + " " + "means" + " " + number + " in " + language + "." + +>>> print(sentence) +... +"девять means nine in Ukrainian." +``` + +If a `list`, `tuple`, `set` or other collection of individual strings needs to be combined into a single `str`, [`.join()`][str-join], is a better option: + + +```python +# str.join() makes a new string from the iterables elements. +>>> chickens = ["hen", "egg", "rooster"] +>>> ' '.join(chickens) +'hen egg rooster' + +# Any string can be used as the joining element. +>>> ' :: '.join(chickens) +'hen :: egg :: rooster' + +>>> ' 🌿 '.join(chickens) +'hen 🌿 egg 🌿 rooster' +``` + +Code points within a `str` can be referenced by `0-based index` number from the left: + + +```python +creative = '창의적인' + +>>> creative[0] +'창' + +>>> creative[2] +'적' + +>>> creative[3] +'인' +``` + +Indexing also works from the right, starting with a `-1-based index`: + + +```python +creative = '창의적인' + +>>> creative[-4] +'창' + +>>> creative[-2] +'적' + +>>> creative[-1] +'인' + +``` + + +There is no separate “character” or "rune" type in Python, so indexing a string produces a new `str` of length 1: + + +```python + +>>> website = "exercism" +>>> type(website[0]) + + +>>> len(website[0]) +1 + +>>> website[0] == website[0:1] == 'e' +True +``` + +Substrings can be selected via _slice notation_, using [`[:stop:]`][common sequence operations] to produce a new string. + Results exclude the `stop` index. + If no `start` is given, the starting index will be 0. + If no `stop` is given, the `stop` index will be the end of the string. + + +```python +moon_and_stars = '🌟🌟🌙🌟🌟⭐' +sun_and_moon = '🌞🌙🌞🌙🌞🌙🌞🌙🌞' + +>>> moon_and_stars[1:4] +'🌟🌙🌟' + +>>> moon_and_stars[:3] +'🌟🌟🌙' + +>>> moon_and_stars[3:] +'🌟🌟⭐' + +>>> moon_and_stars[:-1] +'🌟🌟🌙🌟🌟' + +>>> moon_and_stars[:-3] +'🌟🌟🌙' + +>>> sun_and_moon[::2] +'🌞🌞🌞🌞🌞' + +>>> sun_and_moon[:-2:2] +'🌞🌞🌞🌞' + +>>> sun_and_moon[1:-1:2] +'🌙🌙🌙🌙' +``` + +Strings can also be broken into smaller strings via [`.split()`][str-split], which will return a `list` of substrings. + The list can then be further indexed or split, if needed. + Using `.split()` without any arguments will split the string on whitespace. + + +```python +>>> cat_ipsum = "Destroy house in 5 seconds mock the hooman." +>>> cat_ipsum.split() +... +['Destroy', 'house', 'in', '5', 'seconds', 'mock', 'the', 'hooman.'] + + +>>> cat_ipsum.split()[-1] +'hooman.' + + +>>> cat_words = "feline, four-footed, ferocious, furry" +>>> cat_words.split(',') +... +['feline', 'four-footed', 'ferocious', 'furry'] +``` + + +Separators for `.split()` can be more than one character. +The **whole string** is used for split matching. + + +```python + +>>> colors = """red, +orange, +green, +purple, +yellow""" + +>>> colors.split(',\n') +['red', 'orange', 'green', 'purple', 'yellow'] +``` + +Strings support all [common sequence operations][common sequence operations]. + Individual code points can be iterated through in a loop via `for item in `. + Indexes _with_ items can be iterated through in a loop via `for index, item in enumerate()`. + + +```python + +>>> exercise = 'လေ့ကျင့်' + +# Note that there are more code points than perceived glyphs or characters +>>> for code_point in exercise: +... print(code_point) +... +လ +ေ +့ +က +ျ +င +် +့ + +# Using enumerate will give both the value and index position of each element. +>>> for index, code_point in enumerate(exercise): +... print(index, ": ", code_point) +... +0 : လ +1 : ေ +2 : ့ +3 : က +4 : ျ +5 : င +6 : ် +7 : ့ +``` + + +[common sequence operations]: https://docs.python.org/3/library/stdtypes.html#common-sequence-operations +[str-join]: https://docs.python.org/3/library/stdtypes.html#str.join +[str-split]: https://docs.python.org/3/library/stdtypes.html#str.split +[text sequence]: https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str +[unicode code points]: https://stackoverflow.com/questions/27331819/whats-the-difference-between-a-character-a-code-point-a-glyph-and-a-grapheme + +## Instructions + +You are helping your younger sister with her English vocabulary homework, which she's finding very tedious. + Her class is learning to create new words by adding _prefixes_ and _suffixes_. + Given a set of words, the teacher is looking for correctly transformed words with correct spelling by adding the prefix to the beginning or the suffix to the ending. + +There's four activities in the assignment, each with a set of text or words to work with. + + +## 1. Add a prefix to a word + +One of the most common prefixes in English is `un`, meaning "not". + In this activity, your sister needs to make negative, or "not" words by adding `un` to them. + +Implement the `add_prefix_un()` function that takes `word` as a parameter and returns a new `un` prefixed word: + + +```python +>>> add_prefix_un("happy") +'unhappy' + +>>> add_prefix_un("manageable") +'unmanageable' +``` + + +## 2. Add prefixes to word groups + +There are four more common prefixes that your sister's class is studying: + `en` (_meaning to 'put into' or 'cover with'_), + `pre` (_meaning 'before' or 'forward'_), + `auto` (_meaning 'self' or 'same'_), + and `inter` (_meaning 'between' or 'among'_). + + In this exercise, the class is creating groups of vocabulary words using these prefixes, so they can be studied together. + Each prefix comes in a list with common words it's used with. + The students need to apply the prefix and produce a string that shows the prefix applied to all of the words. + +Implement the `make_word_groups()` function that takes a `vocab_words` as a parameter in the following form: + `[, , .... ]`, and returns a string with the prefix applied to each word that looks like: + `' :: :: :: '`. + + +```python +>>> make_word_groups(['en', 'close', 'joy', 'lighten']) +'en :: enclose :: enjoy :: enlighten' + +>>> make_word_groups(['pre', 'serve', 'dispose', 'position']) +'pre :: preserve :: predispose :: preposition' + +>> make_word_groups(['auto', 'didactic', 'graph', 'mate']) +'auto :: autodidactic :: autograph :: automate' + +>>> make_word_groups(['inter', 'twine', 'connected', 'dependent']) +'inter :: intertwine :: interconnected :: interdependent' +``` + + +## 3. Remove a suffix from a word + +`ness` is a common suffix that means _'state of being'_. + In this activity, your sister needs to find the original root word by removing the `ness` suffix. + But of course there are pesky spelling rules: If the root word originally ended in a consonant followed by a 'y', then the 'y' was changed to to 'i'. + Removing 'ness' needs to restore the 'y' in those root words. e.g. `happiness` --> `happi` --> `happy`. + +Implement the `remove_suffix_ness()` function that takes in a word `str`, and returns the root word without the `ness` suffix. + + +```python +>>> remove_suffix_ness("heaviness") +'heavy' + +>>> remove_suffix_ness("sadness") +'sad' +``` + +## 4. Extract and transform a word + +Suffixes are often used to change the part of speech a word has. + A common practice in English is "verbing" or "verbifying" -- where a adjective _becomes_ a verb by adding an `en` suffix. + +In this task, your sister is going to practice "verbing" words by extracting an adjective from a sentence and turning it into a verb. + Fortunately, all the words that need to be transformed here are "regular" - they don't need spelling changes to add the suffix. + +Implement the `noun_to_verb(, )` function that takes two parameters. + A `sentence` using the vocabulary word, and the `index` of the word, once that sentence is split apart. + The function should return the extracted adjective as a verb. + + +```python +>>> noun_to_verb('I need to make that bright.', -1 ) +'brighten' + +>>> noun_to_verb('It got dark as the sun set.', 2) +'darken' +``` + +## Source + +### Created by + +- @aldraco +- @bethanyg \ No newline at end of file diff --git a/python/little-sisters-vocab/strings.py b/python/little-sisters-vocab/strings.py new file mode 100644 index 0000000..eab1a4a --- /dev/null +++ b/python/little-sisters-vocab/strings.py @@ -0,0 +1,51 @@ +def add_prefix_un(word: str) -> str: + """This function takes `word` as a parameter and returns a new word with an + 'un' prefix. + + :param word: str of a root word + :return: str of root word with un prefix + """ + return f'un{word}' + + +def make_word_groups(vocab_words): + """This function takes a `vocab_words` list and returns a string with the + prefix and the words with prefix applied, separated by ' :: '. + + :param vocab_words: list of vocabulary words with a prefix. + :return: str of prefix followed by vocabulary words with + prefix applied, separated by ' :: '. + """ + prefix = vocab_words[0] + return ' :: '.join( + [prefix] + + [f'{prefix}{word}' + for word + in vocab_words[1:]] + ) + + +def remove_suffix_ness(word): + """This function takes in a word and returns the base word with `ness` + removed. + + :param word: str of word to remove suffix from. + :return: str of word with suffix removed & spelling adjusted. + """ + no_suffix = word[:-4] + if no_suffix[-1] == 'i': + no_suffix = no_suffix[:-1] + 'y' + return no_suffix + + +def noun_to_verb(sentence, index): + """A function takes a `sentence` using the vocabulary word, and the `index` + of the word once that sentence is split apart. The function should return + the extracted adjective as a verb. + + :param sentence: str that uses the word in sentence + :param index: index of the word to remove and transform + :return: str word that changes the extracted adjective to a verb. + """ + word = sentence.split()[index].replace('.', '') + return f'{word}en' diff --git a/python/little-sisters-vocab/strings_test.py b/python/little-sisters-vocab/strings_test.py new file mode 100644 index 0000000..f081946 --- /dev/null +++ b/python/little-sisters-vocab/strings_test.py @@ -0,0 +1,93 @@ +import unittest +import pytest +from strings import (add_prefix_un, + make_word_groups, + remove_suffix_ness, + noun_to_verb) + + +class LittleSistersVocabTest(unittest.TestCase): + + @pytest.mark.task(taskno=1) + def test_add_prefix_un(self): + input_data = ["happy", "manageable", "fold", "eaten", "avoidable", "usual"] + result_data = [f'un{item}' for item in input_data] + number_of_variants = range(1, len(input_data) + 1) + + for variant, word, result in zip(number_of_variants, input_data, result_data): + with self.subTest(f"variation #{variant}", word=word, result=result): + self.assertEqual(add_prefix_un(word), result, + msg=f'Expected: {result} but got a different word instead.') + + @pytest.mark.task(taskno=2) + def test_make_word_groups_en(self): + input_data = ['en', 'circle', 'fold', 'close', 'joy', 'lighten', 'tangle', 'able', 'code', 'culture'] + result_data = 'en :: encircle :: enfold :: enclose :: enjoy :: enlighten :: entangle :: enable :: encode :: enculture' + + self.assertEqual(make_word_groups(input_data), result_data, + msg=f'Expected {result_data} but got something else instead.') + + @pytest.mark.task(taskno=2) + def test_make_word_groups_pre(self): + input_data = ['pre', 'serve', 'dispose', 'position', 'requisite', 'digest', + 'natal', 'addressed', 'adolescent', 'assumption', 'mature', 'compute'] + result_data = ('pre :: preserve :: predispose :: preposition :: prerequisite :: ' + 'predigest :: prenatal :: preaddressed :: preadolescent :: preassumption :: ' + 'premature :: precompute') + + self.assertEqual(make_word_groups(input_data), result_data, + msg=f'Expected {result_data} but got something else instead.') + + @pytest.mark.task(taskno=2) + def test_make_word_groups_auto(self): + input_data = ['auto', 'didactic', 'graph', 'mate', 'chrome', 'centric', 'complete', + 'echolalia', 'encoder', 'biography'] + result_data = ('auto :: autodidactic :: autograph :: automate :: autochrome :: ' + 'autocentric :: autocomplete :: autoecholalia :: autoencoder :: ' + 'autobiography') + + self.assertEqual(make_word_groups(input_data), result_data, + msg=f'Expected {result_data} but got something else instead.') + + @pytest.mark.task(taskno=2) + def test_make_words_groups_inter(self): + input_data = ['inter', 'twine', 'connected', 'dependent', 'galactic', 'action', + 'stellar', 'cellular', 'continental', 'axial', 'operative', 'disciplinary'] + result_data = ('inter :: intertwine :: interconnected :: interdependent :: ' + 'intergalactic :: interaction :: interstellar :: intercellular :: ' + 'intercontinental :: interaxial :: interoperative :: interdisciplinary') + + self.assertEqual(make_word_groups(input_data), result_data, + msg=f'Expected {result_data} but got something else instead.') + + @pytest.mark.task(taskno=3) + def test_remove_suffix_ness(self): + input_data = ["heaviness", "sadness", "softness", "crabbiness", "lightness", "artiness", "edginess"] + result_data = ["heavy", "sad", "soft", 'crabby', 'light', 'arty', 'edgy'] + number_of_variants = range(1, len(input_data) + 1) + + for variant, word, result in zip(number_of_variants, input_data, result_data): + with self.subTest(f"variation #{variant}", word=word, result=result): + self.assertEqual(remove_suffix_ness(word), result, + msg=f'Expected: {result} but got a different word instead.') + + @pytest.mark.task(taskno=4) + def test_noun_to_verb(self): + input_data = ['Look at the bright sky.', + 'His expression went dark.', + 'The bread got hard after sitting out.', + 'The butter got soft in the sun.', + 'Her face was filled with light.', + 'The morning fog made everything damp with mist.', + 'He cut the fence pickets short by mistake.', + 'Charles made weak crying noises.', + 'The black oil got on the white dog.'] + index_data = [-2, -1, 3, 3, -1, -3, 5, 2, 1] + result_data = ['brighten', 'darken', 'harden', 'soften', + 'lighten', 'dampen', 'shorten', 'weaken', 'blacken'] + number_of_variants = range(1, len(input_data) + 1) + + for variant, sentence, index, result in zip(number_of_variants, input_data, index_data, result_data): + with self.subTest(f"variation #{variant}", sentence=sentence, index=index, result=result): + self.assertEqual(noun_to_verb(sentence, index), result, + msg=f'Expected: {result} but got a different word instead.')