diff --git a/README.md b/README.md index 3e841034cbb4d1704f3dd4e2b2378c94d9d8718f..a74f4443fd46dec3331378d813e5749cdbd71076 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,18 @@ # Real Python Materials -Bonus materials, exercises, and example projects for our [Python tutorials](https://realpython.com). +Bonus materials, exercises, and example projects for Real Python's [Python tutorials](https://realpython.com). Build Status: [![CircleCI](https://circleci.com/gh/realpython/materials.svg?style=svg)](https://circleci.com/gh/realpython/materials) -## Running Code Style Checks +## Got a Question? + +The best way to get support for Real Python courses & articles and code in this repository is to join one of our [weekly Office Hours calls](https://realpython.com/office-hours/) or to ask your question in the [RP Community Slack](https://realpython.com/community/). + +Due to time constraints we cannot provide 1:1 support via GitHub. See you on Slack or on the next Office Hours call 🙂 + +## Adding Source Code & Sample Projects to This Repo (RP Contributors) + +### Running Code Style Checks We use [flake8](http://flake8.pycqa.org/en/latest/) and [black](https://github.com/ambv/black) to ensure a consistent code style for all of our sample code in this repository. @@ -15,7 +23,7 @@ $ flake8 $ black --check . ``` -## Running Python Code Formatter +### Running Python Code Formatter We're using a tool called [black](https://github.com/ambv/black) on this repo to ensure consistent formatting. On CI it runs in "check" mode to ensure any new files added to the repo are following PEP 8. If you see linter warnings that say something like "would reformat some_file.py" it means black disagrees with your formatting. diff --git a/nlp-sentiment-analysis/sentiment_analyzer.py b/nlp-sentiment-analysis/sentiment_analyzer.py index 3c815abc764efa9ed257566e68d3eb9be0da19bd..f03bb30eb8399b13ef9acfef1095442f8e3a190b 100644 --- a/nlp-sentiment-analysis/sentiment_analyzer.py +++ b/nlp-sentiment-analysis/sentiment_analyzer.py @@ -79,19 +79,19 @@ def evaluate_model(tokenizer, textcat, test_data: list) -> dict: true_negatives = 0 false_negatives = 1e-8 for i, review in enumerate(textcat.pipe(reviews)): - true_label = labels[i] + true_label = labels[i]["cats"] for predicted_label, score in review.cats.items(): # Every cats dictionary includes both labels, you can get all # the info you need with just the pos label if predicted_label == "neg": continue - if score >= 0.5 and true_label == "pos": + if score >= 0.5 and true_label["pos"]: true_positives += 1 - elif score >= 0.5 and true_label == "neg": + elif score >= 0.5 and true_label["neg"]: false_positives += 1 - elif score < 0.5 and true_label == "neg": + elif score < 0.5 and true_label["neg"]: true_negatives += 1 - elif score < 0.5 and true_label == "pos": + elif score < 0.5 and true_label["pos"]: false_negatives += 1 precision = true_positives / (true_positives + false_positives) recall = true_positives / (true_positives + false_negatives)